Some simple renaming. 'guestos' -> 'kernel'.
Signed-off-by: keir.fraser@cl.cam.ac.uk
}
#else
-#error no guestos SMP support yet...
+#error no kernel SMP support yet...
#include <asm/smp.h>
#define local_flush_tlb() \
for ( i = 0; i < 256; i++ )
{
ctxt.trap_ctxt[i].vector = i;
- ctxt.trap_ctxt[i].cs = FLAT_GUESTOS_CS;
+ ctxt.trap_ctxt[i].cs = FLAT_KERNEL_CS;
}
ctxt.fast_trap_idx = smp_trap_init(ctxt.trap_ctxt);
}
/* Ring 1 stack is the initial stack. */
- ctxt.guestos_ss = __KERNEL_DS;
- ctxt.guestos_esp = idle->thread.esp;
+ ctxt.kernel_ss = __KERNEL_DS;
+ ctxt.kernel_esp = idle->thread.esp;
/* Callback handlers. */
ctxt.event_callback_cs = __KERNEL_CS;
/* Simple and small GDT entries for booting only */
-#define __BOOT_CS FLAT_GUESTOS_CS
+#define __BOOT_CS FLAT_KERNEL_CS
-#define __BOOT_DS FLAT_GUESTOS_DS
+#define __BOOT_DS FLAT_KERNEL_DS
/*
* The interrupt descriptor table has room for 256 idt's,
char *elfbase, int xch, u32 dom, unsigned long *parray,
struct domain_setup_info *dsi);
-static int setup_guestos(int xc_handle,
+static int setup_guest(int xc_handle,
u32 dom,
char *image, unsigned long image_size,
gzFile initrd_gfd, unsigned long initrd_len,
goto error_out;
}
- if ( setup_guestos(xc_handle, domid, image, image_size,
+ if ( setup_guest(xc_handle, domid, image, image_size,
initrd_gfd, initrd_size, nr_pages,
&vstartinfo_start, &vkern_entry,
ctxt, cmdline,
/*
* Initial register values:
- * DS,ES,FS,GS = FLAT_GUESTOS_DS
- * CS:EIP = FLAT_GUESTOS_CS:start_pc
- * SS:ESP = FLAT_GUESTOS_DS:start_stack
+ * DS,ES,FS,GS = FLAT_KERNEL_DS
+ * CS:EIP = FLAT_KERNEL_CS:start_pc
+ * SS:ESP = FLAT_KERNEL_DS:start_stack
* ESI = start_info
* [EAX,EBX,ECX,EDX,EDI,EBP are zero]
* EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
*/
- ctxt->cpu_ctxt.ds = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.es = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.fs = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.gs = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.ss = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.cs = FLAT_GUESTOS_CS;
+ ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.es = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS;
ctxt->cpu_ctxt.eip = vkern_entry;
ctxt->cpu_ctxt.esp = vstartinfo_start + 2*PAGE_SIZE;
ctxt->cpu_ctxt.esi = vstartinfo_start;
for ( i = 0; i < 256; i++ )
{
ctxt->trap_ctxt[i].vector = i;
- ctxt->trap_ctxt[i].cs = FLAT_GUESTOS_CS;
+ ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
}
ctxt->fast_trap_idx = 0;
ctxt->gdt_ents = 0;
/* Ring 1 stack is the initial stack. */
- ctxt->guestos_ss = FLAT_GUESTOS_DS;
- ctxt->guestos_esp = vstartinfo_start + 2*PAGE_SIZE;
+ ctxt->kernel_ss = FLAT_KERNEL_DS;
+ ctxt->kernel_esp = vstartinfo_start + 2*PAGE_SIZE;
/* No debugging. */
memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
/* No callback handlers. */
- ctxt->event_callback_cs = FLAT_GUESTOS_CS;
+ ctxt->event_callback_cs = FLAT_KERNEL_CS;
ctxt->event_callback_eip = 0;
- ctxt->failsafe_callback_cs = FLAT_GUESTOS_CS;
+ ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
ctxt->failsafe_callback_eip = 0;
memset( &launch_op, 0, sizeof(launch_op) );
* 4. fast_trap_idx is checked by Xen.
* 5. ldt base must be page-aligned, no more than 8192 ents, ...
* 6. gdt already done, and further checking is done by Xen.
- * 7. check that guestos_ss is safe.
+ * 7. check that kernel_ss is safe.
* 8. pt_base is already done.
* 9. debugregs are checked by Xen.
* 10. callback code selectors need checking.
{
ctxt.trap_ctxt[i].vector = i;
if ( (ctxt.trap_ctxt[i].cs & 3) == 0 )
- ctxt.trap_ctxt[i].cs = FLAT_GUESTOS_CS;
+ ctxt.trap_ctxt[i].cs = FLAT_KERNEL_CS;
}
- if ( (ctxt.guestos_ss & 3) == 0 )
- ctxt.guestos_ss = FLAT_GUESTOS_DS;
+ if ( (ctxt.kernel_ss & 3) == 0 )
+ ctxt.kernel_ss = FLAT_KERNEL_DS;
if ( (ctxt.event_callback_cs & 3) == 0 )
- ctxt.event_callback_cs = FLAT_GUESTOS_CS;
+ ctxt.event_callback_cs = FLAT_KERNEL_CS;
if ( (ctxt.failsafe_callback_cs & 3) == 0 )
- ctxt.failsafe_callback_cs = FLAT_GUESTOS_CS;
+ ctxt.failsafe_callback_cs = FLAT_KERNEL_CS;
if ( ((ctxt.ldt_base & (PAGE_SIZE - 1)) != 0) ||
(ctxt.ldt_ents > 8192) ||
(ctxt.ldt_base > HYPERVISOR_VIRT_START) ||
#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
static int
-setup_guestos(int xc_handle,
+setup_guest(int xc_handle,
u32 dom,
gzFile kernel_gfd,
unsigned long tot_pages,
}
DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages));
- if (setup_guestos(xc_handle, domid, kernel_gfd, tot_pages,
+ if (setup_guest(xc_handle, domid, kernel_gfd, tot_pages,
&virt_startinfo_addr,
&load_addr, &st_ctxt, cmdline,
op.u.getdomaininfo.shared_info_frame,
/*
* Initial register values:
- * DS,ES,FS,GS = FLAT_GUESTOS_DS
- * CS:EIP = FLAT_GUESTOS_CS:start_pc
- * SS:ESP = FLAT_GUESTOS_DS:start_stack
+ * DS,ES,FS,GS = FLAT_KERNEL_DS
+ * CS:EIP = FLAT_KERNEL_CS:start_pc
+ * SS:ESP = FLAT_KERNEL_DS:start_stack
* ESI = start_info
* [EAX,EBX,ECX,EDX,EDI,EBP are zero]
* EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
*/
- ctxt->cpu_ctxt.ds = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.es = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.fs = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.gs = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.ss = FLAT_GUESTOS_DS;
- ctxt->cpu_ctxt.cs = FLAT_GUESTOS_CS;
+ ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.es = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS;
+ ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS;
ctxt->cpu_ctxt.eip = load_addr;
ctxt->cpu_ctxt.eip = 0x80100020;
/* put stack at top of second page */
/* Virtual IDT is empty at start-of-day. */
for (i = 0; i < 256; i++) {
ctxt->trap_ctxt[i].vector = i;
- ctxt->trap_ctxt[i].cs = FLAT_GUESTOS_CS;
+ ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
}
ctxt->fast_trap_idx = 0;
/* Ring 1 stack is the initial stack. */
/* put stack at top of second page */
- ctxt->guestos_ss = FLAT_GUESTOS_DS;
- ctxt->guestos_esp = ctxt->cpu_ctxt.esp;
+ ctxt->kernel_ss = FLAT_KERNEL_DS;
+ ctxt->kernel_esp = ctxt->cpu_ctxt.esp;
/* No debugging. */
memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg));
/* No callback handlers. */
- ctxt->event_callback_cs = FLAT_GUESTOS_CS;
+ ctxt->event_callback_cs = FLAT_KERNEL_CS;
ctxt->event_callback_eip = 0;
- ctxt->failsafe_callback_cs = FLAT_GUESTOS_CS;
+ ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
ctxt->failsafe_callback_eip = 0;
memset(&launch_op, 0, sizeof (launch_op));
mem_mapp->nr_map = nr_map;
}
-static int setup_guestos(int xc_handle,
+static int setup_guest(int xc_handle,
u32 dom, int memsize,
char *image, unsigned long image_size,
gzFile initrd_gfd, unsigned long initrd_len,
goto error_out;
}
- if ( setup_guestos(xc_handle, domid, memsize, image, image_size,
+ if ( setup_guest(xc_handle, domid, memsize, image, image_size,
initrd_gfd, initrd_size, nr_pages,
ctxt, cmdline,
op.u.getdomaininfo.shared_info_frame,
for ( i = 0; i < 256; i++ )
{
ctxt->trap_ctxt[i].vector = i;
- ctxt->trap_ctxt[i].cs = FLAT_GUESTOS_CS;
+ ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS;
}
ctxt->fast_trap_idx = 0;
/* Ring 1 stack is the initial stack. */
/*
- ctxt->guestos_ss = FLAT_GUESTOS_DS;
- ctxt->guestos_esp = vstartinfo_start;
+ ctxt->kernel_ss = FLAT_KERNEL_DS;
+ ctxt->kernel_esp = vstartinfo_start;
*/
/* No debugging. */
memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
/* No callback handlers. */
- ctxt->event_callback_cs = FLAT_GUESTOS_CS;
+ ctxt->event_callback_cs = FLAT_KERNEL_CS;
ctxt->event_callback_eip = 0;
- ctxt->failsafe_callback_cs = FLAT_GUESTOS_CS;
+ ctxt->failsafe_callback_cs = FLAT_KERNEL_CS;
ctxt->failsafe_callback_eip = 0;
memset( &launch_op, 0, sizeof(launch_op) );
sizeof(ed->arch.user_ctxt));
if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
c->flags |= ECF_I387_VALID;
- if ( GUESTOS_MODE(ed, &ed->arch.user_ctxt) )
- c->flags |= ECF_IN_GUESTOS;
+ if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) )
+ c->flags |= ECF_IN_KERNEL;
memcpy(&c->fpu_ctxt,
&ed->arch.i387,
sizeof(ed->arch.i387));
l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i]);
c->gdt_ents = GET_GDT_ENTRIES(ed);
}
- c->guestos_ss = ed->arch.guestos_ss;
- c->guestos_esp = ed->arch.guestos_sp;
+ c->kernel_ss = ed->arch.kernel_ss;
+ c->kernel_esp = ed->arch.kernel_sp;
c->pt_base =
pagetable_val(ed->arch.pagetable);
memcpy(c->debugreg,
mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
#endif
- ed->arch.flags = TF_guestos_mode;
+ ed->arch.flags = TF_kernel_mode;
}
}
ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail;
ed->arch.perdomain_ptes =
d->arch.mm_perdomain_pt + (ed->eid << PDPT_VCPU_SHIFT);
- ed->arch.flags = TF_guestos_mode;
+ ed->arch.flags = TF_kernel_mode;
}
#ifdef CONFIG_VMX
ed->arch.monitor_table = mk_pagetable(0);
}
-static int vmx_final_setup_guestos(struct exec_domain *ed,
+static int vmx_final_setup_guest(struct exec_domain *ed,
full_execution_context_t *full_context)
{
int error;
}
#endif
-int arch_final_setup_guestos(
+int arch_final_setup_guest(
struct exec_domain *d, full_execution_context_t *c)
{
unsigned long phys_basetab;
if ( c->flags & ECF_I387_VALID )
set_bit(EDF_DONEFPUINIT, &d->ed_flags);
- d->arch.flags &= ~TF_guestos_mode;
- if ( c->flags & ECF_IN_GUESTOS )
- d->arch.flags |= TF_guestos_mode;
+ d->arch.flags &= ~TF_kernel_mode;
+ if ( c->flags & ECF_IN_KERNEL )
+ d->arch.flags |= TF_kernel_mode;
memcpy(&d->arch.user_ctxt,
&c->cpu_ctxt,
d->arch.ldt_base = c->ldt_base;
d->arch.ldt_ents = c->ldt_ents;
- d->arch.guestos_ss = c->guestos_ss;
- d->arch.guestos_sp = c->guestos_esp;
+ d->arch.kernel_ss = c->kernel_ss;
+ d->arch.kernel_sp = c->kernel_esp;
for ( i = 0; i < 8; i++ )
(void)set_debugreg(d, i, c->debugreg[i]);
#ifdef CONFIG_VMX
if (c->flags & ECF_VMX_GUEST)
- return vmx_final_setup_guestos(d, c);
+ return vmx_final_setup_guest(d, c);
#endif
return 0;
/*
* Initial register values:
- * DS,ES,FS,GS = FLAT_GUESTOS_DS
- * CS:EIP = FLAT_GUESTOS_CS:start_pc
- * SS:ESP = FLAT_GUESTOS_SS:start_stack
+ * DS,ES,FS,GS = FLAT_KERNEL_DS
+ * CS:EIP = FLAT_KERNEL_CS:start_pc
+ * SS:ESP = FLAT_KERNEL_SS:start_stack
* ESI = start_info
* [EAX,EBX,ECX,EDX,EDI,EBP are zero]
*/
- ec->ds = ec->es = ec->fs = ec->gs = FLAT_GUESTOS_DS;
- ec->ss = FLAT_GUESTOS_SS;
- ec->cs = FLAT_GUESTOS_CS;
+ ec->ds = ec->es = ec->fs = ec->gs = FLAT_KERNEL_DS;
+ ec->ss = FLAT_KERNEL_SS;
+ ec->cs = FLAT_KERNEL_CS;
ec->eip = start_pc;
ec->esp = start_stack;
ec->esi = start_info;
#ifdef __i386__
/* Switch the guest OS ring-1 stack. */
- tss->esp1 = next_p->arch.guestos_sp;
- tss->ss1 = next_p->arch.guestos_ss;
+ tss->esp1 = next_p->arch.kernel_sp;
+ tss->ss1 = next_p->arch.kernel_ss;
#endif
/* Switch page tables. */
next_p->arch.user_ctxt.gs_base_app>>32);
/* If in guest-OS mode, switch the GS bases around. */
- if ( next_p->arch.flags & TF_guestos_mode )
+ if ( next_p->arch.flags & TF_kernel_mode )
__asm__ __volatile__ ( "swapgs" );
if ( unlikely(!all_segs_okay) )
{
unsigned long *rsp =
- (next_p->arch.flags & TF_guestos_mode) ?
+ (next_p->arch.flags & TF_kernel_mode) ?
(unsigned long *)stack_ec->rsp :
- (unsigned long *)next_p->arch.guestos_sp;
+ (unsigned long *)next_p->arch.kernel_sp;
if ( put_user(stack_ec->ss, rsp- 1) |
put_user(stack_ec->rsp, rsp- 2) |
domain_crash();
}
- if ( !(next_p->arch.flags & TF_guestos_mode) )
+ if ( !(next_p->arch.flags & TF_kernel_mode) )
{
- next_p->arch.flags |= TF_guestos_mode;
+ next_p->arch.flags |= TF_kernel_mode;
__asm__ __volatile__ ( "swapgs" );
/* XXX switch page tables XXX */
}
/* Emulate some simple privileged instructions when exec'ed in ring 1. */
if ( (regs->error_code == 0) &&
- GUESTOS_MODE(ed, regs) &&
+ KERNEL_MODE(ed, regs) &&
emulate_privileged_op(regs) )
return 0;
shadow_cr = host_env->cr0;
shadow_cr &= ~(X86_CR0_PE | X86_CR0_PG);
error |= __vmwrite(CR0_READ_SHADOW, shadow_cr);
- /* CR3 is set in vmx_final_setup_guestos */
+ /* CR3 is set in vmx_final_setup_guest */
error |= __vmwrite(GUEST_CR4, host_env->cr4);
shadow_cr = host_env->cr4;
shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE);
* We're basically forcing default RPLs to 1, so that our "what privilege
* level are we returning to?" logic works.
*/
- ed->arch.failsafe_selector = FLAT_GUESTOS_CS;
- ed->arch.event_selector = FLAT_GUESTOS_CS;
- ed->arch.guestos_ss = FLAT_GUESTOS_SS;
+ ed->arch.failsafe_selector = FLAT_KERNEL_CS;
+ ed->arch.event_selector = FLAT_KERNEL_CS;
+ ed->arch.kernel_ss = FLAT_KERNEL_SS;
for ( i = 0; i < 256; i++ )
- ed->arch.traps[i].cs = FLAT_GUESTOS_CS;
+ ed->arch.traps[i].cs = FLAT_KERNEL_CS;
/* WARNING: The new domain must have its 'processor' field filled in! */
l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
if ( (ss & 3) == 0 )
return -EPERM;
- current->arch.guestos_ss = ss;
- current->arch.guestos_sp = esp;
+ current->arch.kernel_ss = ss;
+ current->arch.kernel_sp = esp;
t->ss1 = ss;
t->esp1 = esp;
/*
* We don't allow a DPL of zero. There is no legitimate reason for
* specifying DPL==0, and it gets rather dangerous if we also accept call
- * gates (consider a call gate pointing at another guestos descriptor with
+ * gates (consider a call gate pointing at another kernel descriptor with
* DPL 0 -- this would get the OS ring-0 privileges).
*/
if ( (b & _SEGMENT_DPL) == 0 )
OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address);
OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
- OFFSET(EDOMAIN_guestos_sp, struct exec_domain, arch.guestos_sp);
+ OFFSET(EDOMAIN_kernel_sp, struct exec_domain, arch.kernel_sp);
BLANK();
OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
* We're basically forcing default RPLs to 1, so that our "what privilege
* level are we returning to?" logic works.
*/
- ed->arch.failsafe_selector = FLAT_GUESTOS_CS;
- ed->arch.event_selector = FLAT_GUESTOS_CS;
- ed->arch.guestos_ss = FLAT_GUESTOS_SS;
+ ed->arch.failsafe_selector = FLAT_KERNEL_CS;
+ ed->arch.event_selector = FLAT_KERNEL_CS;
+ ed->arch.kernel_ss = FLAT_KERNEL_SS;
for ( i = 0; i < 256; i++ )
- ed->arch.traps[i].cs = FLAT_GUESTOS_CS;
+ ed->arch.traps[i].cs = FLAT_KERNEL_CS;
/* WARNING: The new domain must have its 'processor' field filled in! */
phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
create_bounce_frame:
/* Push new frame at existing %rsp if already in guest-OS mode. */
movq XREGS_rsp+8(%rsp),%rsi
- testb $TF_guestos_mode,EDOMAIN_thread_flags(%rbx)
+ testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
jnz 1f
/* Push new frame at registered guest-OS stack top. */
- movq EDOMAIN_guestos_sp(%rbx),%rsi
+ movq EDOMAIN_kernel_sp(%rbx),%rsi
1: movq $HYPERVISOR_VIRT_START,%rax
cmpq %rax,%rsi
jb 1f # In +ve address space? Then okay.
/* Rewrite our stack frame and return to guest-OS mode. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
movb $0,TRAPBOUNCE_flags(%rdx)
- testb $TF_guestos_mode,EDOMAIN_thread_flags(%rbx)
+ testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
jnz 1f
- orb $TF_guestos_mode,EDOMAIN_thread_flags(%rbx)
+ orb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
swapgs
/* XXX switch page tables XXX */
1: movl $TRAP_syscall,XREGS_entry_vector+8(%rsp)
{
if ( (ss & 3) != 3 )
return -EPERM;
- current->arch.guestos_ss = ss;
- current->arch.guestos_sp = esp;
+ current->arch.kernel_ss = ss;
+ current->arch.kernel_sp = esp;
return 0;
}
ret = -EINVAL;
if ( d != NULL )
{
- ret = final_setup_guestos(d, &op->u.builddomain);
+ ret = final_setup_guest(d, &op->u.builddomain);
put_domain(d);
}
}
/*
- * final_setup_guestos is used for final setup and launching of domains other
+ * final_setup_guest is used for final setup and launching of domains other
* than domain 0. ie. the domains that are being built by the userspace dom0
* domain builder.
*/
-int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
+int final_setup_guest(struct domain *p, dom0_builddomain_t *builddomain)
{
int rc = 0;
full_execution_context_t *c;
goto out;
}
- if ( (rc = arch_final_setup_guestos(p->exec_domain[0],c)) != 0 )
+ if ( (rc = arch_final_setup_guest(p->exec_domain[0],c)) != 0 )
goto out;
/* Set up the shared info structure. */
}
/*
- * final_setup_guestos is used for final setup and launching of domains other
+ * final_setup_guest is used for final setup and launching of domains other
* than domain 0. ie. the domains that are being built by the userspace dom0
* domain builder.
*/
sched_add_domain(ed);
- if ( (rc = arch_final_setup_guestos(ed, c)) != 0 ) {
+ if ( (rc = arch_final_setup_guest(ed, c)) != 0 ) {
sched_rem_domain(ed);
goto out;
}
(((_s)>>3) > LAST_RESERVED_GDT_ENTRY) || \
((_s)&4)) && \
(((_s)&3) == 1))
-#define VALID_CODESEL(_s) ((_s) == FLAT_GUESTOS_CS || VALID_SEL(_s))
+#define VALID_CODESEL(_s) ((_s) == FLAT_KERNEL_CS || VALID_SEL(_s))
/* These are bitmasks for the high 32 bits of a descriptor table entry. */
#define _SEGMENT_TYPE (15<< 8)
struct arch_exec_domain
{
- unsigned long guestos_sp;
- unsigned long guestos_ss;
+ unsigned long kernel_sp;
+ unsigned long kernel_ss;
unsigned long flags; /* TF_ */
/* arch_exec_domain' flags values */
#define TF_failsafe_return 1
-#define TF_guestos_mode 2
+#define TF_kernel_mode 2
#ifndef __ASSEMBLY__
#define RING_2(_r) (((_r)->cs & 3) == 2)
#define RING_3(_r) (((_r)->cs & 3) == 3)
-#define GUESTOS_MODE(_e, _r) (!VM86_MODE(_r) && RING_1(_r))
+#define KERNEL_MODE(_e, _r) (!VM86_MODE(_r) && RING_1(_r))
#endif
#define RING_2(_r) (((_r)->cs & 3) == 2)
#define RING_3(_r) (((_r)->cs & 3) == 3)
-#define GUESTOS_MODE(_e, _r) ((_e)->arch.flags & TF_guestos_mode)
+#define KERNEL_MODE(_e, _r) ((_e)->arch.flags & TF_kernel_mode)
#endif
#define FLAT_RING3_DS 0x0833 /* GDT index 262 */
#define FLAT_RING3_SS 0x0833 /* GDT index 262 */
-#define FLAT_GUESTOS_CS FLAT_RING1_CS
-#define FLAT_GUESTOS_DS FLAT_RING1_DS
-#define FLAT_GUESTOS_SS FLAT_RING1_SS
+#define FLAT_KERNEL_CS FLAT_RING1_CS
+#define FLAT_KERNEL_DS FLAT_RING1_DS
+#define FLAT_KERNEL_SS FLAT_RING1_SS
#define FLAT_USER_CS FLAT_RING3_CS
#define FLAT_USER_DS FLAT_RING3_DS
#define FLAT_USER_SS FLAT_RING3_SS
typedef struct {
#define ECF_I387_VALID (1<<0)
#define ECF_VMX_GUEST (1<<1)
-#define ECF_IN_GUESTOS (1<<2)
+#define ECF_IN_KERNEL (1<<2)
unsigned long flags;
execution_context_t cpu_ctxt; /* User-level CPU registers */
char fpu_ctxt[256]; /* User-level FPU registers */
unsigned int fast_trap_idx; /* "Fast trap" vector offset */
unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
- unsigned long guestos_ss, guestos_esp; /* Virtual TSS (only SS1/ESP1) */
+ unsigned long kernel_ss, kernel_esp; /* Virtual TSS (only SS1/ESP1) */
unsigned long pt_base; /* CR3 (pagetable base) */
unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
unsigned long event_callback_cs; /* CS:EIP of event callback */
#define FLAT_RING3_SS32 0x082b /* GDT index 262 */
#define FLAT_RING3_SS64 0x082b /* GDT index 262 */
-#define FLAT_GUESTOS_DS64 FLAT_RING3_DS64
-#define FLAT_GUESTOS_DS32 FLAT_RING3_DS32
-#define FLAT_GUESTOS_DS FLAT_GUESTOS_DS64
-#define FLAT_GUESTOS_CS64 FLAT_RING3_CS64
-#define FLAT_GUESTOS_CS32 FLAT_RING3_CS32
-#define FLAT_GUESTOS_CS FLAT_GUESTOS_CS64
-#define FLAT_GUESTOS_SS64 FLAT_RING3_SS64
-#define FLAT_GUESTOS_SS32 FLAT_RING3_SS32
-#define FLAT_GUESTOS_SS FLAT_GUESTOS_SS64
+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
+#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
+#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
+#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
#define FLAT_USER_DS64 FLAT_RING3_DS64
#define FLAT_USER_DS32 FLAT_RING3_DS32
typedef struct {
#define ECF_I387_VALID (1<<0)
#define ECF_VMX_GUEST (1<<1)
-#define ECF_IN_GUESTOS (1<<2)
+#define ECF_IN_KERNEL (1<<2)
unsigned long flags;
execution_context_t cpu_ctxt; /* User-level CPU registers */
char fpu_ctxt[512]; /* User-level FPU registers */
trap_info_t trap_ctxt[256]; /* Virtual IDT */
unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
- unsigned long guestos_ss, guestos_esp; /* Virtual TSS (only SS1/ESP1) */
+ unsigned long kernel_ss, kernel_esp; /* Virtual TSS (only SS1/ESP1) */
unsigned long pt_base; /* CR3 (pagetable base) */
unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
unsigned long event_callback_cs; /* CS:EIP of event callback */
} PACKED vcpu_info_t; /* 8 + arch */
/*
- * Xen/guestos shared data -- pointer provided in start_info.
+ * Xen/kernel shared data -- pointer provided in start_info.
* NB. We expect that this struct is smaller than a page.
*/
typedef struct shared_info_st
extern void arch_do_boot_vcpu(struct exec_domain *ed);
-extern int arch_final_setup_guestos(
+extern int arch_final_setup_guest(
struct exec_domain *d, full_execution_context_t *c);
extern void free_perdomain_pt(struct domain *d);
unsigned long image_start, unsigned long image_len,
unsigned long initrd_start, unsigned long initrd_len,
char *cmdline);
-extern int final_setup_guestos(struct domain *d, dom0_builddomain_t *);
+extern int final_setup_guest(struct domain *d, dom0_builddomain_t *);
struct domain *find_domain_by_id(domid_t dom);
struct domain *find_last_domain(void);